#include <xeno/dom0_ops.h>
#include <asm/io.h>
#include <asm/domain_page.h>
+ #include <asm/flushtlb.h>
+#include <asm/msr.h>
+#include <xeno/multiboot.h>
+
+#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)
+#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY)
+
+extern int nr_mods;
+extern module_t *mod;
+extern unsigned char *cmdline;
rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED;
* shared_info:
* <one page>
*/
-#define MB_PER_DOMAIN 16
-#include <asm/msr.h>
-#include <xeno/multiboot.h>
-extern int nr_mods;
-extern module_t *mod;
-extern unsigned char *cmdline;
+
+int final_setup_guestos(struct task_struct * p, dom_meminfo_t * meminfo)
+{
+ l2_pgentry_t * l2tab;
+ l1_pgentry_t * l1tab;
+ start_info_t * virt_startinfo_addr;
+ unsigned long virt_stack_addr;
+ unsigned long long time;
+ unsigned long phys_l2tab;
+ net_ring_t *net_ring;
+ net_vif_t *net_vif;
+ char *dst; // temporary
+ int i; // temporary
+
+ /* entries 0xe0000000 onwards in page table must contain hypervisor
+ * mem mappings - set them up.
+ */
+ phys_l2tab = meminfo->l2_pgt_addr;
+ l2tab = map_domain_mem(phys_l2tab);
+ memcpy(l2tab + DOMAIN_ENTRIES_PER_L2_PAGETABLE,
+ ((l2_pgentry_t *)idle_pg_table[p->processor]) +
+ DOMAIN_ENTRIES_PER_L2_PAGETABLE,
+ (ENTRIES_PER_L2_PAGETABLE - DOMAIN_ENTRIES_PER_L2_PAGETABLE)
+ * sizeof(l2_pgentry_t));
+ l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
+ mk_l2_pgentry(__pa(p->mm.perdomain_pt) | PAGE_HYPERVISOR);
+ p->mm.pagetable = mk_pagetable(phys_l2tab);
+ unmap_domain_mem(l2tab);
+
+ /* map in the shared info structure */
+ phys_l2tab = pagetable_val(p->mm.pagetable);
+ l2tab = map_domain_mem(phys_l2tab);
+ l2tab += l2_table_offset(meminfo->virt_shinfo_addr);
+ l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
+ l1tab += l1_table_offset(meminfo->virt_shinfo_addr);
+ *l1tab = mk_l1_pgentry(__pa(p->shared_info) | L1_PROT);
+ unmap_domain_mem((void *)((unsigned long)l2tab & PAGE_MASK));
+ unmap_domain_mem((void *)((unsigned long)l1tab & PAGE_MASK));
+
+ /* set up the shared info structure */
+ rdtscll(time);
+ p->shared_info->wall_time = time;
+ p->shared_info->domain_time = time;
+ p->shared_info->ticks_per_ms = ticks_per_usec * 1000;
+
+ /* we pass start info struct to guest os as function parameter on stack */
+ virt_startinfo_addr = (start_info_t *)meminfo->virt_startinfo_addr;
+ virt_stack_addr = (unsigned long)virt_startinfo_addr;
+
+ /* we need to populate start_info struct within the context of the
+ * new domain. thus, temporarely install its pagetables.
+ */
+ __cli();
+ __asm__ __volatile__ (
+ "mov %%eax,%%cr3" : : "a" (pagetable_val(p->mm.pagetable)));
+
+ memset(virt_startinfo_addr, 0, sizeof(virt_startinfo_addr));
+ virt_startinfo_addr->nr_pages = p->tot_pages;
+ virt_startinfo_addr->shared_info = (shared_info_t *)meminfo->virt_shinfo_addr;
+ virt_startinfo_addr->pt_base = meminfo->virt_load_addr +
+ ((p->tot_pages - 1) << PAGE_SHIFT);
+
+ /* Add virtual network interfaces and point to them in startinfo. */
+ while (meminfo->num_vifs-- > 0) {
+ net_vif = create_net_vif(p->domain);
+ net_ring = net_vif->net_ring;
+ if (!net_ring) panic("no network ring!\n");
+ }
+
+/* XXX SMH: horrible hack to convert hypervisor VAs in SHIP to guest VAs */
+#define SH2G(_x) (meminfo->virt_shinfo_addr | (((unsigned long)(_x)) & 0xFFF))
+
+ virt_startinfo_addr->net_rings = (net_ring_t *)SH2G(p->net_ring_base);
+ virt_startinfo_addr->num_net_rings = p->num_net_vifs;
+
+ /* Add block io interface */
+ virt_startinfo_addr->blk_ring = (blk_ring_t *)SH2G(p->blk_ring_base);
+
+ dst = virt_startinfo_addr->cmd_line;
+ if ( mod[0].string )
+ {
+ char *modline = (char *)__va(mod[0].string);
+ for ( i = 0; i < 255; i++ )
+ {
+ if ( modline[i] == '\0' ) break;
+ *dst++ = modline[i];
+ }
+ }
+ *dst = '\0';
+
+ if ( opt_nfsroot )
+ {
+ unsigned char boot[150];
+ unsigned char ipbase[20], nfsserv[20], gateway[20], netmask[20];
+ unsigned char nfsroot[70];
+ snprintf(nfsroot, 70, opt_nfsroot, p->domain);
+ snprintf(boot, 200,
+ " root=/dev/nfs ip=%s:%s:%s:%s::eth0:off nfsroot=%s",
+ quad_to_str(opt_ipbase + p->domain, ipbase),
+ quad_to_str(opt_nfsserv, nfsserv),
+ quad_to_str(opt_gateway, gateway),
+ quad_to_str(opt_netmask, netmask),
+ nfsroot);
+ strcpy(dst, boot);
+ }
+
+ /* Reinstate the caller's page tables. */
+ __asm__ __volatile__ (
+ "mov %%eax,%%cr3" : : "a" (pagetable_val(current->mm.pagetable)));
+ __sti();
+
+ new_thread(p,
+ (unsigned long)meminfo->virt_load_addr,
+ (unsigned long)virt_stack_addr,
+ (unsigned long)virt_startinfo_addr);
+
+ return 0;
+}
+
+static unsigned long alloc_page_from_domain(unsigned long * cur_addr,
+ unsigned long * index)
+{
+ struct list_head *ent = frame_table[*cur_addr >> PAGE_SHIFT].list.prev;
+ *cur_addr = list_entry(ent, struct pfn_info, list) - frame_table;
+ *cur_addr <<= PAGE_SHIFT;
+ (*index)--;
+ return *cur_addr;
+}
+
+/* setup_guestos is used for building dom0 solely. other domains are built in
+ * userspace dom0 and final setup is being done by final_setup_guestos.
+ */
int setup_guestos(struct task_struct *p, dom0_newdomain_t *params)
{
-#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)
-#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY)
-#define ALLOC_FRAME_FROM_DOMAIN() (alloc_address -= PAGE_SIZE)
++
+ struct list_head *list_ent;
char *src, *dst;
int i, dom = p->domain;
- unsigned long start_address, phys_l1tab, phys_l2tab;
- unsigned long cur_address, end_address, alloc_address, vaddr;
+ unsigned long phys_l1tab, phys_l2tab;
+ unsigned long cur_address, alloc_address;
unsigned long virt_load_address, virt_stack_address, virt_shinfo_address;
- unsigned long virt_ftable_start_addr = 0, virt_ftable_end_addr;
- unsigned long ft_mapping = (unsigned long)frame_table;
- unsigned int ft_size = 0;
start_info_t *virt_startinfo_address;
unsigned long long time;
+ unsigned long count;
+ unsigned long alloc_index;
l2_pgentry_t *l2tab, *l2start;
- l1_pgentry_t *l1tab = NULL;
+ l1_pgentry_t *l1tab = NULL, *l1start = NULL;
struct pfn_info *page = NULL;
net_ring_t *net_ring;
net_vif_t *net_vif;
max_page = nr_pages;
frame_table_size = nr_pages * sizeof(struct pfn_info);
frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
- free_pfns = nr_pages -
- ((MAX_MONITOR_ADDRESS + frame_table_size) >> PAGE_SHIFT);
-
- frame_table = phys_to_virt(MAX_MONITOR_ADDRESS);
+ frame_table = (frame_table_t *)FRAMETABLE_VIRT_START;
memset(frame_table, 0, frame_table_size);
+ free_pfns = nr_pages -
+ ((__pa(frame_table) + frame_table_size) >> PAGE_SHIFT);
+
/* Put all domain-allocatable memory on a free list. */
+ spin_lock_irqsave(&free_list_lock, flags);
INIT_LIST_HEAD(&free_list);
- for( page_index = (MAX_MONITOR_ADDRESS + frame_table_size) >> PAGE_SHIFT;
+ for( page_index = (__pa(frame_table) + frame_table_size) >> PAGE_SHIFT;
page_index < nr_pages;
page_index++ )
{
return err;
}
-
-/* Apply updates to page table @pagetable_id within the current domain. */
-int do_process_page_updates(page_update_request_t *updates, int count)
+int do_process_page_updates(page_update_request_t *ureqs, int count)
{
- page_update_request_t cur;
+ page_update_request_t req;
unsigned long flags, pfn;
struct pfn_info *page;
int err = 0, i;
if ( tlb_flush[smp_processor_id()] )
{
tlb_flush[smp_processor_id()] = 0;
- __asm__ __volatile__ (
- "movl %%eax,%%cr3" : :
- "a" (pagetable_val(current->mm.pagetable)));
+ __write_cr3_counted(pagetable_val(current->mm.pagetable));
+
}
return(0);